{
int i, j;
s_time_t now = NOW();
- unsigned long *counters = (unsigned long *)&perfcounters;
+ atomic_t *counters = (atomic_t *)&perfcounters;
printk("Xen performance counters SHOW (now = 0x%08X:%08X)\n",
(u32)(now>>32), (u32)now);
switch ( perfc_info[i].type )
{
case TYPE_SINGLE:
- printk("%10ld 0x%08lx %s\n",
- counters[0], counters[0], perfc_info[i].name);
+ printk("%10d 0x%08x %s\n",
+ atomic_read(&counters[0]), atomic_read(&counters[0]),
+ perfc_info[i].name);
counters += 1;
break;
case TYPE_CPU:
for ( j = 0; j < smp_num_cpus; j++ )
- printk("%10ld 0x%08lx %s[CPU %02d]\n",
- counters[j], counters[j], perfc_info[i].name, j);
+ printk("%10d 0x%08x %s[CPU %02d]\n",
+ atomic_read(&counters[j]), atomic_read(&counters[j]),
+ perfc_info[i].name, j);
counters += j;
break;
case TYPE_ARRAY:
for ( j = 0; j < perfc_info[i].nr_elements; j++ )
- printk("%10ld 0x%08lx %s[ARR %02d]\n",
- counters[j], counters[j], perfc_info[i].name, j);
+ printk("%10d 0x%08x %s[ARR %02d]\n",
+ atomic_read(&counters[j]), atomic_read(&counters[j]),
+ perfc_info[i].name, j);
counters += j;
break;
}
* xen performance counters
*/
+#include <asm/atomic.h>
+
/*
* NOTE: new counters must be defined in perfc_defn.h
*
*/
#define PERFCOUNTER( var, name ) \
-unsigned long var[1];
+ atomic_t var[1];
#define PERFCOUNTER_CPU( var, name ) \
-unsigned long var[NR_CPUS];
+ atomic_t var[NR_CPUS];
#define PERFCOUNTER_ARRAY( var, name, size ) \
-unsigned long var[size];
+ atomic_t var[size];
struct perfcounter_t
{
extern struct perfcounter_t perfcounters;
-#define perfc_value(x) perfcounters.x[0]
-#define perfc_valuec(x) perfcounters.x[smp_processor_id()]
-#define perfc_valuea(x,y) perfcounters.x[y]
-#define perfc_set(x,v) perfcounters.x[0] = v
-#define perfc_setc(x,v) perfcounters.x[smp_processor_id()] = v
-#define perfc_seta(x,y,v) perfcounters.x[y] = v
-#define perfc_incr(x) perfcounters.x[0]++
-#define perfc_incrc(x) perfcounters.x[smp_processor_id()]++
-#define perfc_incra(x,y) perfcounters.x[y]++
-#define perfc_add(x,y) perfcounters.x[0]+=(y)
-#define perfc_addc(x,y) perfcounters.x[smp_processor_id()]+=(y)
-#define perfc_adda(x,y,z) perfcounters.x[y]+=(z)
+#define perfc_value(x) atomic_read(&perfcounters.x[0])
+#define perfc_valuec(x) atomic_read(&perfcounters.x[smp_processor_id()])
+#define perfc_valuea(x,y) atomic_read(&perfcounters.x[y])
+#define perfc_set(x,v) atomic_set(&perfcounters.x[0], v)
+#define perfc_setc(x,v) atomic_set(&perfcounters.x[smp_processor_id()], v)
+#define perfc_seta(x,y,v) atomic_set(&perfcounters.x[y], v)
+#define perfc_incr(x) atomic_inc(&perfcounters.x[0])
+#define perfc_incrc(x) atomic_inc(&perfcounters.x[smp_processor_id()])
+#define perfc_incra(x,y) atomic_inc(&perfcounters.x[y])
+#define perfc_add(x,y) atomic_add((y), &perfcounters.x[0])
+#define perfc_addc(x,y) atomic_add((y), &perfcounters.x[smp_processor_id()])
+#define perfc_adda(x,y,z) atomic_add((z), &perfcounters.x[y])
PERFCOUNTER_CPU( sched_run2, "sched: runs through scheduler" )
PERFCOUNTER_CPU( sched_ctx, "sched: context switches" )
+PERFCOUNTER( net_rx_capacity_drop, "net rx capacity drop" )
+PERFCOUNTER( net_rx_delivered, "net rx delivered" )
+PERFCOUNTER( net_rx_tlbflush, "net rx tlb flushes" )
#include <asm/domain_page.h>
#include <asm/pgalloc.h>
+#include <xeno/perfc.h>
+
#define BUG_TRAP ASSERT
#define notifier_call_chain(_a,_b,_c) ((void)0)
#define rtmsg_ifinfo(_a,_b,_c) ((void)0)
if ( (i = vif->rx_cons) == vif->rx_prod )
{
spin_unlock(&vif->domain->page_lock);
+ perfc_incr(net_rx_capacity_drop);
return;
}
rx = vif->rx_shadow_ring + i;
*/
if ( rx->flush_count == (unsigned short)
atomic_read(&tlb_flush_count[vif->domain->processor]) )
+ {
+ perfc_incr(net_rx_tlbflush);
flush_tlb_cpu(vif->domain->processor);
+ }
- out:
- make_rx_response(vif, rx->id, size, status, offset);
+ perfc_incr(net_rx_delivered);
/* record this so they can be billed */
vif->total_packets_received++;
vif->total_bytes_received += size;
+
+ out:
+ make_rx_response(vif, rx->id, size, status, offset);
}
/**